iT邦幫忙

2025 iThome 鐵人賽

DAY 23
0
Rust

Rust 實戰專案集:30 個漸進式專案從工具到服務系列 第 23

記憶體使用分析器 - 分析程式記憶體使用模式

  • 分享至 

  • xImage
  •  

前言

今天開始主要是寫系統工具相關的主題,一開始我們主要以記憶體使用和分析為主,
所以今天就做一個記憶體的分析氣,追蹤分配調派,以及識別是否洩漏等,
並可以針對相關產生視覺化報告

今日學習目標

  • 追蹤記憶體分配與釋放
  • 識別記憶體洩漏
  • 分析記憶體使用模式
  • 生成統計報告
  • 支援多種輸出格式

依賴

[package]
name = "memory-profiler"
version = "0.1.0"
edition = "2021"

[dependencies]
chrono = "0.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
colored = "2.0"

開始專案

資料結構

use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryAllocation {
    pub address: usize,
    pub size: usize,
    pub timestamp: DateTime<Utc>,
    pub stack_trace: Vec<String>,
    pub tag: String,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryDeallocation {
    pub address: usize,
    pub timestamp: DateTime<Utc>,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryStats {
    pub total_allocated: usize,
    pub total_deallocated: usize,
    pub current_usage: usize,
    pub peak_usage: usize,
    pub allocation_count: usize,
    pub deallocation_count: usize,
    pub leak_count: usize,
}

impl Default for MemoryStats {
    fn default() -> Self {
        Self {
            total_allocated: 0,
            total_deallocated: 0,
            current_usage: 0,
            peak_usage: 0,
            allocation_count: 0,
            deallocation_count: 0,
            leak_count: 0,
        }
    }
}

分析和心

src/profiler.rs

pub struct MemoryProfiler {
    allocations: Arc<Mutex<HashMap<usize, MemoryAllocation>>>,
    deallocations: Arc<Mutex<Vec<MemoryDeallocation>>>,
    stats: Arc<Mutex<MemoryStats>>,
    enabled: Arc<Mutex<bool>>,
}

impl MemoryProfiler {
    pub fn new() -> Self {
        Self {
            allocations: Arc::new(Mutex::new(HashMap::new())),
            deallocations: Arc::new(Mutex::new(Vec::new())),
            stats: Arc::new(Mutex::new(MemoryStats::default())),
            enabled: Arc::new(Mutex::new(true)),
        }
    }

    pub fn enable(&self) {
        let mut enabled = self.enabled.lock().unwrap();
        *enabled = true;
    }

    pub fn disable(&self) {
        let mut enabled = self.enabled.lock().unwrap();
        *enabled = false;
    }

    pub fn is_enabled(&self) -> bool {
        *self.enabled.lock().unwrap()
    }

    pub fn track_allocation(
        &self,
        address: usize,
        size: usize,
        tag: String,
    ) -> Result<(), String> {
        if !self.is_enabled() {
            return Ok(());
        }

        let allocation = MemoryAllocation {
            address,
            size,
            timestamp: Utc::now(),
            stack_trace: self.capture_stack_trace(),
            tag,
        };

        let mut allocations = self.allocations.lock().unwrap();
        allocations.insert(address, allocation);

        let mut stats = self.stats.lock().unwrap();
        stats.total_allocated += size;
        stats.current_usage += size;
        stats.allocation_count += 1;

        if stats.current_usage > stats.peak_usage {
            stats.peak_usage = stats.current_usage;
        }

        Ok(())
    }

    pub fn track_deallocation(&self, address: usize) -> Result<(), String> {
        if !self.is_enabled() {
            return Ok(());
        }

        let mut allocations = self.allocations.lock().unwrap();
        
        if let Some(allocation) = allocations.remove(&address) {
            let deallocation = MemoryDeallocation {
                address,
                timestamp: Utc::now(),
            };

            let mut deallocations = self.deallocations.lock().unwrap();
            deallocations.push(deallocation);

            let mut stats = self.stats.lock().unwrap();
            stats.total_deallocated += allocation.size;
            stats.current_usage -= allocation.size;
            stats.deallocation_count += 1;

            Ok(())
        } else {
            Err(format!("Address {} not found in allocations", address))
        }
    }

    fn capture_stack_trace(&self) -> Vec<String> {
        // 簡化版本的堆疊追蹤
        // 實際應用中可以使用 backtrace crate
        vec![
            "main::function1".to_string(),
            "main::function2".to_string(),
        ]
    }

    pub fn get_stats(&self) -> MemoryStats {
        let stats = self.stats.lock().unwrap();
        stats.clone()
    }

    pub fn detect_leaks(&self) -> Vec<MemoryAllocation> {
        let allocations = self.allocations.lock().unwrap();
        
        let mut leaks: Vec<MemoryAllocation> = allocations
            .values()
            .cloned()
            .collect();
        
        leaks.sort_by(|a, b| b.size.cmp(&a.size));

        let mut stats = self.stats.lock().unwrap();
        stats.leak_count = leaks.len();

        leaks
    }

    pub fn get_allocations_by_tag(&self, tag: &str) -> Vec<MemoryAllocation> {
        let allocations = self.allocations.lock().unwrap();
        
        allocations
            .values()
            .filter(|a| a.tag == tag)
            .cloned()
            .collect()
    }

    pub fn get_allocation_timeline(&self) -> Vec<(DateTime<Utc>, usize)> {
        let allocations = self.allocations.lock().unwrap();
        let deallocations = self.deallocations.lock().unwrap();

        let mut timeline = Vec::new();

        for alloc in allocations.values() {
            timeline.push((alloc.timestamp, alloc.size as isize));
        }

        for dealloc in deallocations.iter() {
            if let Some(alloc) = allocations.get(&dealloc.address) {
                timeline.push((dealloc.timestamp, -(alloc.size as isize)));
            }
        }

        timeline.sort_by(|a, b| a.0.cmp(&b.0));

        let mut current_usage = 0usize;
        timeline
            .into_iter()
            .map(|(time, delta)| {
                if delta > 0 {
                    current_usage += delta as usize;
                } else {
                    current_usage = current_usage.saturating_sub((-delta) as usize);
                }
                (time, current_usage)
            })
            .collect()
    }

    pub fn reset(&self) {
        let mut allocations = self.allocations.lock().unwrap();
        allocations.clear();

        let mut deallocations = self.deallocations.lock().unwrap();
        deallocations.clear();

        let mut stats = self.stats.lock().unwrap();
        *stats = MemoryStats::default();
    }
}

impl Default for MemoryProfiler {
    fn default() -> Self {
        Self::new()
    }
}

報告生成功能

src/report.rs

use colored::*;

pub struct ReportGenerator;

impl ReportGenerator {
    pub fn generate_text_report(profiler: &MemoryProfiler) -> String {
        let stats = profiler.get_stats();
        let leaks = profiler.detect_leaks();

        let mut report = String::new();
        
        report.push_str(&format!("\n{}\n", "=== Memory Profiler Report ===".bold().cyan()));
        report.push_str(&format!("\n{}\n", "Statistics:".bold()));
        report.push_str(&format!("  Total Allocated:   {} bytes\n", 
            Self::format_bytes(stats.total_allocated)));
        report.push_str(&format!("  Total Deallocated: {} bytes\n", 
            Self::format_bytes(stats.total_deallocated)));
        report.push_str(&format!("  Current Usage:     {} bytes\n", 
            Self::format_bytes(stats.current_usage).green()));
        report.push_str(&format!("  Peak Usage:        {} bytes\n", 
            Self::format_bytes(stats.peak_usage).yellow()));
        report.push_str(&format!("  Allocations:       {}\n", stats.allocation_count));
        report.push_str(&format!("  Deallocations:     {}\n", stats.deallocation_count));
        
        if !leaks.is_empty() {
            report.push_str(&format!("\n{}\n", "Memory Leaks Detected:".bold().red()));
            report.push_str(&format!("  Total Leaks: {}\n", leaks.len()));
            report.push_str(&format!("  Total Leaked: {} bytes\n\n", 
                Self::format_bytes(leaks.iter().map(|l| l.size).sum())));

            for (i, leak) in leaks.iter().take(10).enumerate() {
                report.push_str(&format!("  {}. Address: 0x{:x}, Size: {} bytes, Tag: {}\n",
                    i + 1, leak.address, leak.size, leak.tag));
            }

            if leaks.len() > 10 {
                report.push_str(&format!("  ... and {} more\n", leaks.len() - 10));
            }
        } else {
            report.push_str(&format!("\n{}\n", "No Memory Leaks Detected!".green()));
        }

        report
    }

    pub fn generate_json_report(profiler: &MemoryProfiler) -> Result<String, String> {
        let stats = profiler.get_stats();
        let leaks = profiler.detect_leaks();

        #[derive(Serialize)]
        struct JsonReport {
            stats: MemoryStats,
            leaks: Vec<MemoryAllocation>,
            timestamp: DateTime<Utc>,
        }

        let report = JsonReport {
            stats,
            leaks,
            timestamp: Utc::now(),
        };

        serde_json::to_string_pretty(&report)
            .map_err(|e| format!("Failed to serialize report: {}", e))
    }

    pub fn generate_timeline_report(profiler: &MemoryProfiler) -> String {
        let timeline = profiler.get_allocation_timeline();
        
        let mut report = String::new();
        report.push_str(&format!("\n{}\n", "Memory Usage Timeline:".bold()));
        
        for (time, usage) in timeline.iter().take(20) {
            let bar = Self::create_bar(usage, 1_000_000); // 1MB as max
            report.push_str(&format!("{} {} {}\n",
                time.format("%H:%M:%S"),
                bar,
                Self::format_bytes(*usage)));
        }

        if timeline.len() > 20 {
            report.push_str(&format!("... and {} more entries\n", timeline.len() - 20));
        }

        report
    }

    fn format_bytes(bytes: usize) -> String {
        const KB: usize = 1024;
        const MB: usize = 1024 * KB;
        const GB: usize = 1024 * MB;

        if bytes >= GB {
            format!("{:.2} GB", bytes as f64 / GB as f64)
        } else if bytes >= MB {
            format!("{:.2} MB", bytes as f64 / MB as f64)
        } else if bytes >= KB {
            format!("{:.2} KB", bytes as f64 / KB as f64)
        } else {
            format!("{} B", bytes)
        }
    }

    fn create_bar(value: &usize, max: usize) -> String {
        let width = 30;
        let filled = ((*value as f64 / max as f64) * width as f64) as usize;
        let filled = filled.min(width);
        
        let bar = "█".repeat(filled) + &"░".repeat(width - filled);
        format!("[{}]", bar)
    }
}

記憶體追蹤器

src/tracker.rs

pub struct SmartTracker {
    profiler: Arc<MemoryProfiler>,
    threshold_mb: usize,
    check_interval_secs: u64,
}

impl SmartTracker {
    pub fn new(profiler: Arc<MemoryProfiler>, threshold_mb: usize) -> Self {
        Self {
            profiler,
            threshold_mb,
            check_interval_secs: 5,
        }
    }

    pub fn start_monitoring(&self) {
        let profiler = Arc::clone(&self.profiler);
        let threshold_bytes = self.threshold_mb * 1024 * 1024;
        let interval = self.check_interval_secs;

        std::thread::spawn(move || {
            loop {
                std::thread::sleep(std::time::Duration::from_secs(interval));
                
                let stats = profiler.get_stats();
                
                if stats.current_usage > threshold_bytes {
                    println!("{}", 
                        format!("⚠️  Memory usage ({}) exceeded threshold ({} MB)",
                            ReportGenerator::format_bytes(stats.current_usage),
                            threshold_bytes / 1024 / 1024).yellow());
                    
                    let leaks = profiler.detect_leaks();
                    if !leaks.is_empty() {
                        println!("{}", 
                            format!("   Found {} potential memory leaks", leaks.len()).red());
                    }
                }
            }
        });
    }

    pub fn analyze_pattern(&self) -> MemoryPattern {
        let timeline = self.profiler.get_allocation_timeline();
        
        if timeline.is_empty() {
            return MemoryPattern::Stable;
        }

        let usages: Vec<usize> = timeline.iter().map(|(_, u)| *u).collect();
        let avg = usages.iter().sum::<usize>() / usages.len();
        let variance = usages.iter()
            .map(|&u| (u as f64 - avg as f64).powi(2))
            .sum::<f64>() / usages.len() as f64;

        if variance < (avg as f64 * 0.1) {
            MemoryPattern::Stable
        } else if usages.windows(2).all(|w| w[1] >= w[0]) {
            MemoryPattern::Growing
        } else if usages.windows(2).filter(|w| w[1] > w[0]).count() > usages.len() / 2 {
            MemoryPattern::Fluctuating
        } else {
            MemoryPattern::Declining
        }
    }
}

#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MemoryPattern {
    Stable,
    Growing,
    Declining,
    Fluctuating,
}

impl std::fmt::Display for MemoryPattern {
    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
        match self {
            MemoryPattern::Stable => write!(f, "Stable"),
            MemoryPattern::Growing => write!(f, "Growing (potential leak)"),
            MemoryPattern::Declining => write!(f, "Declining"),
            MemoryPattern::Fluctuating => write!(f, "Fluctuating"),
        }
    }
}

main.rs 主程式

use std::sync::Arc;

mod profiler;
mod report;
mod tracker;

use profiler::MemoryProfiler;
use report::ReportGenerator;
use tracker::{SmartTracker, MemoryPattern};

fn main() {
    println!("Memory Profiler - Analyzing Memory Usage Patterns\n");

    // 建立分析器
    let profiler = Arc::new(MemoryProfiler::new());

    // 啟動智能監控
    let tracker = SmartTracker::new(Arc::clone(&profiler), 10); // 10 MB threshold
    tracker.start_monitoring();

    // 模擬記憶體分配
    simulate_memory_usage(&profiler);

    // 生成報告
    println!("{}", ReportGenerator::generate_text_report(&profiler));
    println!("{}", ReportGenerator::generate_timeline_report(&profiler));

    // 分析模式
    let pattern = tracker.analyze_pattern();
    println!("\n{}: {}", "Memory Pattern".bold(), pattern);

    // 匯出 JSON 報告
    if let Ok(json) = ReportGenerator::generate_json_report(&profiler) {
        std::fs::write("memory_report.json", json)
            .expect("Failed to write report");
        println!("\n✓ JSON report saved to memory_report.json");
    }
}

fn simulate_memory_usage(profiler: &MemoryProfiler) {
    println!("Simulating memory allocations...\n");

    // 正常分配
    for i in 0..100 {
        let size = 1024 * (i % 10 + 1);
        profiler.track_allocation(
            0x1000 + i * 0x100,
            size,
            "buffer".to_string(),
        ).ok();
    }

    // 釋放部分記憶體
    for i in 0..80 {
        profiler.track_deallocation(0x1000 + i * 0x100).ok();
    }

    // 建立一些記憶體洩漏
    for i in 0..5 {
        let size = 1024 * 1024; // 1 MB
        profiler.track_allocation(
            0x10000 + i * 0x1000,
            size,
            format!("leak_{}", i),
        ).ok();
    }

    // 等待監控執行
    std::thread::sleep(std::time::Duration::from_secs(2));
}

// 實際使用範例
#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_memory_tracking() {
        let profiler = MemoryProfiler::new();

        profiler.track_allocation(0x1000, 1024, "test".to_string()).unwrap();
        profiler.track_allocation(0x2000, 2048, "test".to_string()).unwrap();

        let stats = profiler.get_stats();
        assert_eq!(stats.allocation_count, 2);
        assert_eq!(stats.current_usage, 3072);

        profiler.track_deallocation(0x1000).unwrap();
        let stats = profiler.get_stats();
        assert_eq!(stats.current_usage, 2048);
    }

    #[test]
    fn test_leak_detection() {
        let profiler = MemoryProfiler::new();

        profiler.track_allocation(0x1000, 1024, "leak".to_string()).unwrap();
        profiler.track_allocation(0x2000, 2048, "leak".to_string()).unwrap();

        let leaks = profiler.detect_leaks();
        assert_eq!(leaks.len(), 2);
    }

    #[test]
    fn test_pattern_analysis() {
        let profiler = Arc::new(MemoryProfiler::new());
        let tracker = SmartTracker::new(Arc::clone(&profiler), 10);

        for i in 0..10 {
            profiler.track_allocation(
                0x1000 + i * 0x100,
                1024,
                "pattern".to_string(),
            ).unwrap();
        }

        let pattern = tracker.analyze_pattern();
        assert_eq!(pattern, MemoryPattern::Stable);
    }
}

針對主程式分析用途

基本部分

let profiler = MemoryProfiler::new();

// 追蹤分配
profiler.track_allocation(address, size, "buffer".to_string())?;

// 追蹤釋放
profiler.track_deallocation(address)?;

// 取得統計
let stats = profiler.get_stats();
println!("Current usage: {} bytes", stats.current_usage);

memory leak 偵測

let leaks = profiler.detect_leaks();
for leak in leaks {
    println!("Leak at 0x{:x}: {} bytes ({})", 
        leak.address, leak.size, leak.tag);
}

生成報告

// 文字報告
let report = ReportGenerator::generate_text_report(&profiler);
println!("{}", report);

// JSON 報告
let json = ReportGenerator::generate_json_report(&profiler)?;
std::fs::write("report.json", json)?;

// 時間軸報告
let timeline = ReportGenerator::generate_timeline_report(&profiler);
println!("{}", timeline);

上一篇
文件轉換器 - Markdown 轉 HTML/PDF 工具
下一篇
服務健康監控 - 監控系統服務狀態並發送警報
系列文
Rust 實戰專案集:30 個漸進式專案從工具到服務25
圖片
  熱門推薦
圖片
{{ item.channelVendor }} | {{ item.webinarstarted }} |
{{ formatDate(item.duration) }}
直播中

尚未有邦友留言

立即登入留言